import cv2
import numpy as np
import pandas as pd
import math
import json
import copy
import maxflow as mf
from matplotlib import pyplot as plt
from scipy.signal import convolve2d
from sklearn.ensemble import AdaBoostClassifier
from sklearn import metrics
from operator import itemgetter
img = cv2.imread('../data/images/austrailian-mango-plantation.jpg')
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
def load_annotations(file):
with open(file) as json_file:
annotations = json.loads(json_file.read())
return annotations
def draw_annotations(img, annotation, color, outline_width):
key = next(iter(annotation))
regions = annotation[key]["regions"]
for region in regions:
cx = regions[region]["shape_attributes"]["cx"]
cy = regions[region]["shape_attributes"]["cy"]
r = regions[region]["shape_attributes"]["r"]
cv2.circle(img, (cx,cy), r, color, outline_width)
return img
annotations = load_annotations('../data/annotations/austrailian-mango-plantation.json')
annotated_img = draw_annotations(img.copy(), annotations, (226,230,209), 12)
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB))
plt.show()
Downsampling Image from (3000, 4000) to (900, 1200)to speed up processing. (Might remove later when the entire pipeline is finished)
img = cv2.resize(img, (1200, 900))
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
(Might remove later when entire pipeline is finished.)
def resize_annotations(annotations, downscale):
key = next(iter(annotations))
regions = annotations[key]["regions"]
for region in regions:
cx = int(regions[region]["shape_attributes"]["cx"] / downscale)
cy = int(regions[region]["shape_attributes"]["cy"] / downscale)
r = int(regions[region]["shape_attributes"]["r"] / downscale)
annotations[key]["regions"][region]["shape_attributes"]["cx"] = cx
annotations[key]["regions"][region]["shape_attributes"]["cy"] = cy
annotations[key]["regions"][region]["shape_attributes"]["r"] = r
return annotations
annotations = resize_annotations(annotations, 3.33333333333)
annotated_img = draw_annotations(img.copy(), annotations, (226,230,209), 4)
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB))
plt.show()
def split_mark(annotations, train_percent):
regions = annotations["austrailian-mango-plantation.jpg2426773"]["regions"]
sorted_regions = []
for region in regions:
lowerbound = regions[region]["shape_attributes"]["cy"] + regions[region]["shape_attributes"]["r"]
sorted_regions.append((region, lowerbound))
sorted_regions.sort(key=lambda tup: tup[1])
return sorted_regions[int(len(sorted_regions)*train_percent)-1][1]
def train_test_split(img, mark, mode):
if mode == 0:
return (img[:mark, :], img[mark:, :])
elif mode == 1:
return (img[:, :mark], img[:, mark:])
# mark = split_mark(annotations, 0.7)
# (test, train) = train_test_split(img, mark, mode)
# mode = 1
test_mark = 675
train_mark = 688
test = img[:, test_mark:]
train = img[:, :train_mark]
f = plt.figure(figsize=(18,9))
ax = f.add_subplot(121)
ax.set_xlabel("Train Image")
ax.imshow(cv2.cvtColor(train, cv2.COLOR_BGR2RGB))
ax2 = f.add_subplot(122)
ax2.set_xlabel("Test Image")
ax2.imshow(cv2.cvtColor(test, cv2.COLOR_BGR2RGB))
plt.show()
marked_img = annotated_img.copy()
img_row, img_col, img_spec = img.shape
cv2.rectangle(marked_img,(img_col-2,2),(train_mark,img_row-1),(255,0,0),4)
cv2.rectangle(marked_img,(2,2),(test_mark,img_row-1),(0,0,255),4)
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(marked_img, cv2.COLOR_BGR2RGB))
plt.show()
# (annotated_test, annotated_train) = train_test_split(annotated_img, mark, mode)
annotated_test = annotated_img[:, test_mark:]
annotated_train = annotated_img[:, :train_mark]
f = plt.figure(figsize=(18,9))
ax = f.add_subplot(121)
ax.set_xlabel("Train Image With Annotaions")
ax.imshow(cv2.cvtColor(annotated_train, cv2.COLOR_BGR2RGB))
ax2 = f.add_subplot(122)
ax2.set_xlabel("Test Image With Annotations")
ax2.imshow(cv2.cvtColor(annotated_test, cv2.COLOR_BGR2RGB))
plt.show()
Color, texture, and entropy are chosen as the features for pixel-level classification.
Color is undoubtedly the most revealing feature for trees (Yang et al., 2009). The RGB channels of each aerial image will be converted to CIE L*a*b* color space for a better perceptual uniformity, and to the illumination-invariant color space (Chong et al., 2008) for some robustness against the change of lighting condition. The two color representations will be concatenated to form a 6 dimensional feature vector at each pixel.
lab = cv2.cvtColor(train, cv2.COLOR_BGR2LAB)
lab_features = np.reshape(lab, (-1, 3))
pd.DataFrame(lab_features, columns=['L', 'a', 'b']).head()
| L | a | b | |
|---|---|---|---|
| 0 | 68 | 123 | 141 |
| 1 | 48 | 127 | 138 |
| 2 | 11 | 128 | 133 |
| 3 | 35 | 125 | 134 |
| 4 | 47 | 123 | 135 |
def bias_black(rgb_vec):
rgb_vec[np.sum(rgb_vec, axis=1)==0] = [1,0,0]
return rgb_vec
def bgr2xyz_vec(bgr):
m = [[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]]
m = np.array(m)
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
rgb = np.reshape(rgb, (-1,3))
rgb = bias_black(rgb)
rgb = np.transpose(rgb)
return np.true_divide(np.matmul(m, rgb), 2.55)
def illumination_invariant_colorspace_features(bgr):
A = [[27.07439, -22.80783,-1.806681],
[-5.646736, -7.722125, 12.86503],
[-4.163133, -4.579428, -4.576049]]
B = [[0.9465229, 0.2946927, -0.1313419],
[-0.1179179, 0.9929960, 0.007371554],
[0.09230461, -0.04645794, 0.9946464]]
A = np.array(A)
B = np.array(B)
xyz_vec = bgr2xyz_vec(bgr)
ii_features = np.transpose(np.matmul(A, np.log(np.matmul(B, xyz_vec))))
return ii_features
ii_features = illumination_invariant_colorspace_features(train)
pd.DataFrame(ii_features, columns=["Illumination Invariant 1", "Illumination Invariant 2", "Illumination Invariant 3"]).head()
| Illumination Invariant 1 | Illumination Invariant 2 | Illumination Invariant 3 | |
|---|---|---|---|
| 0 | 12.059618 | -3.265322 | -41.585699 |
| 1 | 11.925654 | -3.435717 | -37.243996 |
| 2 | 11.916584 | -13.377799 | -18.962393 |
| 3 | 10.547525 | -2.348117 | -33.989232 |
| 4 | 10.420751 | -2.137160 | -37.247458 |
The texture pattern formed by tree leaves often distinguishes trees from similarly colored objects (Yang et al., 2009) such as grass and bushes. The texture feature is generated as a set of filter responses at each pixel by convolving the L channel of each aerial image with a filter-bank. Gaussian derivative filter were empirically chosen to from the filter-bank. Each Gaussian derivative filter is a second derivate of gaussian in the X and Y direction. The filter-bank consists of filters on 3 scales (with σ = 1, √2, 2) and 6 orientations uniformly sampled in [0, π), which generates an 18 dimensional feature vector at each pixel.
def oriented_gaussian_derivative_filter2(image, sigma=1, theta=0, truncate=4.0):
radius = int(truncate * sigma + 0.5)
x = np.arange(-radius, radius+1)
xx, yy = np.meshgrid(x, x)
g0 = np.exp(-(xx**2 + yy**2)/(2*sigma**2)) / (sigma*math.sqrt(2*math.pi))
G2a = -g0 / sigma**2 + np.multiply(g0, np.power(xx,2)) / sigma**4
G2b = np.multiply(np.multiply(g0, xx),yy) / sigma**4
G2c = -g0 / sigma**2 + np.multiply(g0,np.power(yy,2)) / sigma**4
I2a = convolve2d(image, G2a, mode='same', boundary='symm')
I2b = convolve2d(image, G2b, mode='same', boundary='symm')
I2c = convolve2d(image, G2c, mode='same', boundary='symm')
return np.cos(theta)**2*I2a + np.sin(theta)**2*I2c - 2*np.cos(theta)*np.sin(theta)*I2b
def get_texture_features(L):
texture_features = None
sigma = [1, math.sqrt(2), 2]
orientations = np.linspace(0, math.pi, num=7, endpoint=True)[0:-1]
for s in sigma:
for o in orientations:
texture = oriented_gaussian_derivative_filter2(L, sigma=s, theta=o)
texture = np.reshape(texture, (-1, 1))
if texture_features is not None:
texture_features = np.concatenate((texture_features, texture), axis=1)
else:
texture_features = texture
return texture_features
texture_features = get_texture_features(lab[:, :, 0])
pd.DataFrame(texture_features, columns=["σ=1; θ=0", "σ=1; θ=π/6", "σ=1; θ=2π/6", "σ=1; θ=3π/6", "σ=1; θ=4π/6", "σ=1; θ=5π/6", "σ=√2; θ=0", "σ=√2; θ=π/6", "σ=√2; θ=2π/6", "σ=√2; θ=3π/6", "σ=√2; θ=4π/6", "σ=√2; θ=5π/6", "σ=2; θ=0", "σ=2; θ=π/6", "σ=2; θ=2π/6", "σ=2; θ=3π/6", "σ=2; θ=4π/6", "σ=2; θ=5π/6"]).head()
| σ=1; θ=0 | σ=1; θ=π/6 | σ=1; θ=2π/6 | σ=1; θ=3π/6 | σ=1; θ=4π/6 | σ=1; θ=5π/6 | σ=√2; θ=0 | σ=√2; θ=π/6 | σ=√2; θ=2π/6 | σ=√2; θ=3π/6 | σ=√2; θ=4π/6 | σ=√2; θ=5π/6 | σ=2; θ=0 | σ=2; θ=π/6 | σ=2; θ=2π/6 | σ=2; θ=3π/6 | σ=2; θ=4π/6 | σ=2; θ=5π/6 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | -33.710778 | -26.134480 | -2.538572 | 13.481038 | 5.904740 | -17.691168 | -25.872663 | -15.690432 | 8.683600 | 22.875400 | 12.693169 | -11.680863 | -15.294360 | -4.774398 | 17.012326 | 28.279089 | 17.759128 | -4.027597 |
| 1 | -6.352681 | -2.592485 | 14.208542 | 27.249373 | 23.489177 | 6.688150 | -5.679631 | 0.256321 | 17.836310 | 29.480347 | 23.544396 | 5.964406 | -7.671578 | 1.137458 | 19.707050 | 29.467607 | 20.658571 | 2.088978 |
| 2 | 39.217142 | 41.810600 | 36.907717 | 29.411377 | 26.817919 | 31.720801 | 17.269399 | 22.251104 | 29.829553 | 32.426297 | 27.444592 | 19.866143 | 1.990471 | 9.566094 | 23.311379 | 29.481041 | 21.905418 | 8.160133 |
| 3 | 21.256911 | 27.928841 | 23.603396 | 12.606023 | 5.934094 | 10.259538 | 18.632868 | 25.713984 | 28.989679 | 25.184258 | 18.103143 | 14.827448 | 7.809773 | 14.893384 | 23.946377 | 25.915759 | 18.832148 | 9.779155 |
| 4 | -8.447602 | -1.749808 | 0.420866 | -4.106253 | -10.804047 | -12.974721 | 5.359222 | 13.187514 | 16.361398 | 11.706989 | 3.878696 | 0.704813 | 8.910390 | 15.224366 | 20.036957 | 18.535572 | 12.221596 | 7.409005 |
Entropy measures the uncertainty of a random variable (Yang et al., 2009), which in this case the L channel of aerial images. This helps to differentiate between tree leaves and ground. The entropy of each pixel is computed within 5 x 5, 9 x 9, and 17 x 17 search windows on the L channel of the image. Concatenating the entropy values forms a 3 dimensional feature vector at each pixel.
def entropy(signal):
'''
function returns entropy of a signal.
signal must be a 1-D numpy array
'''
lensig = signal.size
symset = list(set(signal))
numsym = len(symset)
propab = [np.size(signal[signal==i])/(1.0*lensig) for i in symset]
ent = np.sum([p*np.log2(1.0/p) for p in propab])
return ent
def get_entropy_features(L):
N_list = [2, 4, 8]
row, col = L.shape
E = np.array(L)
E_features = None
for N in N_list:
for r in range(row):
for c in range(col):
Lx = np.max([0,c-N])
Ux = np.min([col,c+N+1])
Ly = np.max([0,r-N])
Uy = np.min([row,r+N+1])
region = L[Ly:Uy, Lx:Ux].flatten()
E[r, c] = entropy(region)
E = np.reshape(E, (-1, 1))
if E_features is not None:
E_features = np.concatenate((E_features, E), axis=1)
else:
E_features = E
E = np.array(L)
return E_features
entropy_features = get_entropy_features(lab[:, :, 0])
pd.DataFrame(entropy_features, columns=["search window = 5x5", "search window = 9x9", "search window = 17x17"]).head()
| search window = 5x5 | search window = 9x9 | search window = 17x17 | |
|---|---|---|---|
| 0 | 3 | 4 | 5 |
| 1 | 3 | 4 | 5 |
| 2 | 3 | 4 | 5 |
| 3 | 3 | 5 | 5 |
| 4 | 3 | 5 | 5 |
f = plt.figure(figsize=(15,7))
ax = f.add_subplot(141)
ax2 = f.add_subplot(142)
ax3 = f.add_subplot(143)
ax4 = f.add_subplot(144)
ax.imshow(cv2.cvtColor(train, cv2.COLOR_BGR2RGB))
ax.set_xlabel("Original Image")
ax2.imshow(np.reshape(entropy_features[:,0], (train.shape[0], train.shape[1])), cmap=plt.cm.jet)
ax2.set_xlabel("5x5 Search Window")
ax3.imshow(np.reshape(entropy_features[:,1], (train.shape[0], train.shape[1])), cmap=plt.cm.jet)
ax3.set_xlabel("9x9 Search Window")
ax4.imshow(np.reshape(entropy_features[:,2], (train.shape[0], train.shape[1])), cmap=plt.cm.jet)
ax4.set_xlabel("17x17 Search Window")
plt.show()
Finally, by concatenating color, texture and entropy features, a 27 dimensional feature vector is formed at each pixel.
columns = ["L", "a", "b", "Illumination Invariant 1", "Illumination Invariant 2", "Illumination Invariant 3", "σ=1; θ=0", "σ=1; θ=π/6", "σ=1; θ=2π/6", "σ=1; θ=3π/6", "σ=1; θ=4π/6", "σ=1; θ=5π/6", "σ=√2; θ=0", "σ=√2; θ=π/6", "σ=√2; θ=2π/6", "σ=√2; θ=3π/6", "σ=√2; θ=4π/6", "σ=√2; θ=5π/6", "σ=2; θ=0", "σ=2; θ=π/6", "σ=2; θ=2π/6", "σ=2; θ=3π/6", "σ=2; θ=4π/6", "σ=2; θ=5π/6", "search window = 5x5", "search window = 9x9", "search window = 17x17"]
train_data = np.concatenate((lab_features, ii_features, texture_features, entropy_features), axis=1)
train_data = pd.DataFrame(train_data, columns=columns)
train_data.head()
| L | a | b | Illumination Invariant 1 | Illumination Invariant 2 | Illumination Invariant 3 | σ=1; θ=0 | σ=1; θ=π/6 | σ=1; θ=2π/6 | σ=1; θ=3π/6 | ... | σ=√2; θ=5π/6 | σ=2; θ=0 | σ=2; θ=π/6 | σ=2; θ=2π/6 | σ=2; θ=3π/6 | σ=2; θ=4π/6 | σ=2; θ=5π/6 | search window = 5x5 | search window = 9x9 | search window = 17x17 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 68.0 | 123.0 | 141.0 | 12.059618 | -3.265322 | -41.585699 | -33.710778 | -26.134480 | -2.538572 | 13.481038 | ... | -11.680863 | -15.294360 | -4.774398 | 17.012326 | 28.279089 | 17.759128 | -4.027597 | 3.0 | 4.0 | 5.0 |
| 1 | 48.0 | 127.0 | 138.0 | 11.925654 | -3.435717 | -37.243996 | -6.352681 | -2.592485 | 14.208542 | 27.249373 | ... | 5.964406 | -7.671578 | 1.137458 | 19.707050 | 29.467607 | 20.658571 | 2.088978 | 3.0 | 4.0 | 5.0 |
| 2 | 11.0 | 128.0 | 133.0 | 11.916584 | -13.377799 | -18.962393 | 39.217142 | 41.810600 | 36.907717 | 29.411377 | ... | 19.866143 | 1.990471 | 9.566094 | 23.311379 | 29.481041 | 21.905418 | 8.160133 | 3.0 | 4.0 | 5.0 |
| 3 | 35.0 | 125.0 | 134.0 | 10.547525 | -2.348117 | -33.989232 | 21.256911 | 27.928841 | 23.603396 | 12.606023 | ... | 14.827448 | 7.809773 | 14.893384 | 23.946377 | 25.915759 | 18.832148 | 9.779155 | 3.0 | 5.0 | 5.0 |
| 4 | 47.0 | 123.0 | 135.0 | 10.420751 | -2.137160 | -37.247458 | -8.447602 | -1.749808 | 0.420866 | -4.106253 | ... | 0.704813 | 8.910390 | 15.224366 | 20.036957 | 18.535572 | 12.221596 | 7.409005 | 3.0 | 5.0 | 5.0 |
5 rows × 27 columns
def is_pixel_tree(row):
if row[0] == 0 and row[1] == 0 and row[2] == 0:
return 1
else:
return 0
annotated_img = draw_annotations(img.copy(), annotations, (0,0,0), -1)
annotated_img = annotated_img[:, :train_mark]
matrix = pd.DataFrame(np.reshape(annotated_img, (-1, 3)))
train_data["Tree"] = matrix.apply(is_pixel_tree, axis=1)
train_data.head()
| L | a | b | Illumination Invariant 1 | Illumination Invariant 2 | Illumination Invariant 3 | σ=1; θ=0 | σ=1; θ=π/6 | σ=1; θ=2π/6 | σ=1; θ=3π/6 | ... | σ=2; θ=0 | σ=2; θ=π/6 | σ=2; θ=2π/6 | σ=2; θ=3π/6 | σ=2; θ=4π/6 | σ=2; θ=5π/6 | search window = 5x5 | search window = 9x9 | search window = 17x17 | Tree | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 68.0 | 123.0 | 141.0 | 12.059618 | -3.265322 | -41.585699 | -33.710778 | -26.134480 | -2.538572 | 13.481038 | ... | -15.294360 | -4.774398 | 17.012326 | 28.279089 | 17.759128 | -4.027597 | 3.0 | 4.0 | 5.0 | 0 |
| 1 | 48.0 | 127.0 | 138.0 | 11.925654 | -3.435717 | -37.243996 | -6.352681 | -2.592485 | 14.208542 | 27.249373 | ... | -7.671578 | 1.137458 | 19.707050 | 29.467607 | 20.658571 | 2.088978 | 3.0 | 4.0 | 5.0 | 0 |
| 2 | 11.0 | 128.0 | 133.0 | 11.916584 | -13.377799 | -18.962393 | 39.217142 | 41.810600 | 36.907717 | 29.411377 | ... | 1.990471 | 9.566094 | 23.311379 | 29.481041 | 21.905418 | 8.160133 | 3.0 | 4.0 | 5.0 | 0 |
| 3 | 35.0 | 125.0 | 134.0 | 10.547525 | -2.348117 | -33.989232 | 21.256911 | 27.928841 | 23.603396 | 12.606023 | ... | 7.809773 | 14.893384 | 23.946377 | 25.915759 | 18.832148 | 9.779155 | 3.0 | 5.0 | 5.0 | 0 |
| 4 | 47.0 | 123.0 | 135.0 | 10.420751 | -2.137160 | -37.247458 | -8.447602 | -1.749808 | 0.420866 | -4.106253 | ... | 8.910390 | 15.224366 | 20.036957 | 18.535572 | 12.221596 | 7.409005 | 3.0 | 5.0 | 5.0 | 0 |
5 rows × 28 columns
X_train = train_data.loc[:, train_data.columns != "Tree"]
y_train = train_data["Tree"]
abc = AdaBoostClassifier(n_estimators=200, learning_rate=1)
model = abc.fit(X_train, y_train)
y_pred = model.predict(X_train)
height = model.feature_importances_
bars = ["", "L*a*b*", "", "", "", "Illum-inv", "", "", "", "", "", "", "", "", "Texture", "", "", "", "", "", "", "", "", "", "", "Entropy", ""]
y_pos = [0,1,2,5,6,7,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,30,31,32]
plt.bar(y_pos, height)
plt.title('Adaboost: feature importance')
plt.xlabel('Feature')
plt.ylabel('Importance')
plt.xticks(y_pos, bars)
# plt.show()
plt.savefig('../misc/paper/second config/adaboost_feature_importance.png', )
height = model.feature_importances_
height_lab = np.sum(height[:3])
height_ii = np.sum(height[3:6])
height_texture = np.sum(height[6:24])
height_entropy = np.sum(height[24:])
height = [height_lab, height_ii, height_texture, height_entropy]
bars= ["L*a*b", "Illum-inv", "Texture", "Entropy"]
y_pos = [0,3,6,9]
plt.bar(y_pos, height)
plt.title('Adaboost: feature group importance')
plt.xlabel('Feature Group')
plt.ylabel('Importance')
plt.xticks(y_pos, bars)
# plt.show()
plt.savefig('../misc/paper/second config/adaboost_feature_group_importance.png')
print("Accuracy:",metrics.accuracy_score(y_train, y_pred))
Accuracy: 0.9255943152454781
y_pred_copy = y_pred.copy()
y_pred_copy[y_pred_copy==1] = 255
px_level = train.copy()
row, col, spec = train.shape
px_level = pd.DataFrame(np.reshape(px_level, (-1, 3)))
y_pred_copy = pd.DataFrame(y_pred_copy)
y_pred_copy[1] = y_pred_copy[0]
y_pred_copy[2] = y_pred_copy[1]
px_level = px_level.where(y_pred_copy == 255, 0)
px_level = np.reshape(px_level.values, (row, col, spec))
y_pred_copy = np.reshape(y_pred_copy[0].values, (row, col))
f = plt.figure(figsize=(34,18))
ax = f.add_subplot(131)
ax.set_xlabel("Training")
ax.imshow(cv2.cvtColor(train, cv2.COLOR_BGR2RGB))
ax2 = f.add_subplot(132)
ax2.set_xlabel("Mask")
ax2.imshow(1-y_pred_copy, cmap="Greys")
ax3 = f.add_subplot(133)
ax3.set_xlabel("Pixel Level Classification")
ax3.imshow(cv2.cvtColor(px_level, cv2.COLOR_BGR2RGB))
plt.show()
def V_p_q(label1, label2):
return abs(label1-label2)
def D_p(label, proba, x, y):
if label == 1:
return math.log(1-proba[y][x])
else:
return math.log(proba[y][x])
def give_neighbours(image, x, y):
if x>=len(image[0]) or x<0 or y>=len(image) or y<0:
raise ValueError('Pixel is not in image. x and/or y are to large')
ns = []
for a,b in zip([1,0,-1,0],[0,1,0,-1]):
if (x+a<len(image[0]) and x+a>=0) and (y+b<len(image) and y+b>=0):
ns.append(image[y+b][x+a])
return ns
def return_mapping_of_image(image, alpha, beta):
map = {}
revmap = {}
map_parameter = 0
for y in range(len(image)):
for x in range(len(image[0])):
map[map_parameter] = (y,x)
revmap[(y,x)] = map_parameter
map_parameter += 1
return map, revmap
def alpha_beta_swap_new(alpha, beta, tree, tree_proba):
#extract position of alpha or beta pixels to mapping
map, revmap = return_mapping_of_image(tree, alpha, beta)
#graph of maxflow
graph_mf = mf.Graph[float](len(map) )
#add nodes
nodes = graph_mf.add_nodes(len(map))
#add n-link edges
weight = 1
for i in range(0,len(map)):
y,x = map[i]
#top, left, bottom, right
for a,b in zip([1,0,-1,0],[0,1,0,-1]):
if (y+b, x+a) in revmap:
graph_mf.add_edge(i,revmap[(y+b,x+a)], weight, 0)
#add all the terminal edges
for i in range(0,len(map)):
y,x = map[i]
#find neighbours
neighbours = give_neighbours(tree, x, y)
#calculation of weight
t_weight_alpha = sum([V_p_q(alpha,v) for v in neighbours]) + D_p(alpha, tree_proba, x, y)
t_weight_beta = sum([V_p_q(beta,v) for v in neighbours]) + D_p(beta, tree_proba, x, y)
graph_mf.add_tedge(nodes[i], t_weight_alpha, t_weight_beta)
#calculating flow
flow = graph_mf.maxflow()
res = [graph_mf.get_segment(nodes[i]) for i in range(0, len(nodes))]
#depending on cut assign new label
for i in range(0, len(res)):
y, x = map[i]
if res[i] == 1:
tree[y][x] = alpha
else:
tree[y][x] = beta
return tree
def swap_minimization(tree, tree_proba, cycles):
for i in range(0, cycles):
tree = alpha_beta_swap_new(0, 1, tree, tree_proba)
#user output and interims result image
#print(str(u+1) + "\t\t\t", calculate_energy(img_orig, img_work))
# print("Energy after " + str(u+1) + "/" + str(cycles) + " cylces:", calculate_energy(img_orig, img_work))
return tree
y_pred_proba = model.predict_proba(X_train)[:,1]
tree_proba = np.reshape(y_pred_proba, (row, col))
tree = np.reshape(y_pred, (row, col))
refined_class = swap_minimization(tree, tree_proba, 10)
refined_mask = refined_class.copy()
refined_mask = np.reshape(refined_mask, (-1,1))
refined_mask[refined_mask==1] = 255
refined = train.copy()
refined = pd.DataFrame(np.reshape(refined, (-1, 3)))
refined_mask = pd.DataFrame(refined_mask)
refined_mask[1] = refined_mask[0]
refined_mask[2] = refined_mask[1]
refined = refined.where(refined_mask == 255, 0)
refined = np.reshape(refined.values, (row, col, spec))
refined_mask = np.reshape(refined_mask[0].values, (row, col))
f = plt.figure(figsize=(25,15))
ax = f.add_subplot(151)
ax.set_xlabel("Training")
ax.imshow(cv2.cvtColor(train, cv2.COLOR_BGR2RGB))
ax2 = f.add_subplot(152)
ax2.set_xlabel("Mask")
ax2.imshow(1-y_pred_copy, cmap="Greys")
ax3 = f.add_subplot(153)
ax3.set_xlabel("Pixel Level Classification")
ax3.imshow(cv2.cvtColor(px_level, cv2.COLOR_BGR2RGB))
ax4 = f.add_subplot(154)
ax4.set_xlabel("Mask (Refined)")
ax4.imshow(1-refined_mask, cmap="Greys")
ax5 = f.add_subplot(155)
ax5.set_xlabel("Refined Classification")
ax5.imshow(cv2.cvtColor(refined, cv2.COLOR_BGR2RGB))
plt.show()
regions = annotations["austrailian-mango-plantation.jpg2426773"]["regions"]
training_regions = []
for region in regions:
radius = regions[region]["shape_attributes"]["r"]
cx = regions[region]["shape_attributes"]["cx"]
cy = regions[region]["shape_attributes"]["cy"]
upperbound = cy - radius
lowerbound = cy + radius
leftbound = cx - radius
rightbound = cx + radius
training_regions.append((upperbound, lowerbound, leftbound, rightbound, cx, cy, radius))
training_regions.sort(key=lambda tup: tup[2])
img_row, img_col, img_spec = img.shape
training_regions = [r for r in training_regions if (r[0] >= 0 and r[1] < img_row and r[2] >= 0 and r[3] < train_mark)]
training_regions = np.array([list(region) for region in training_regions])
radius = training_regions[:, 6]
print("min: {}, max: {}".format(radius.min(), radius.max()))
print(sorted(radius))
print("num of ground truth: {}".format(len(radius)))
min: 6, max: 27 [6, 7, 8, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27] num of ground truth: 108
f = plt.figure(figsize=(7,7))
ax = f.add_subplot(111)
ax.hist(sorted(radius.astype('int')))
(array([ 6., 4., 8., 7., 5., 10., 11., 19., 23., 15.]), array([ 6. , 8.1, 10.2, 12.3, 14.4, 16.5, 18.6, 20.7, 22.8, 24.9, 27. ]), <a list of 10 Patch objects>)
def round_off_radius(regions, ro_radiuses, minmax):
ro_regions = copy.deepcopy(regions)
for i in range(len(regions)):
region = regions[i]
r = region[2]
for ro in ro_radiuses:
if r >= ro - minmax[0] and r <= ro + minmax[1]:
r = ro
break
ro_regions[i][2] = r
return ro_regions
ro_regions = round_off_radius(training_regions[:,4:], (7, 10, 13, 16, 19, 22, 25), (1, 1))
ro_regions = ro_regions[ro_regions[:,2].argsort()]
ro_regions = ro_regions[:-2]
ro_radius = ro_regions[:, 2]
print("min: {}, max: {}".format(ro_radius.min(), ro_radius.max()))
print(sorted(ro_radius))
print("num of ground truth: {}".format(len(ro_radius)))
min: 7, max: 25 [7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10, 10, 10, 10, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 16, 16, 16, 16, 16, 16, 16, 16, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25] num of ground truth: 106
def get_portion(img, cx, cy, r):
row, col, spec = img.shape
Lx = cx-r
Ux = cx+r+1
Ly = cy-r
Uy = cy+r+1
portion = img[Ly:Uy, Lx:Ux]
return portion
def scale_region(region):
scaled = region.copy()
scaled = scaled.astype('float64')
for i in range(len(scaled)):
for j in range(len(scaled[i])):
scaled[i][j] = np.true_divide(scaled[i][j], 255)
return scaled
def is_pixel_P(row):
if row[0] == 0 and row[1] == 0 and row[2] == 0:
return 1
else:
return 0
def add_p(radius, scaled_average):
row, col, spec = scaled_average.shape
sa_copy = scaled_average.copy()
cv2.circle(sa_copy, (int(row/2),int(col/2)), radius, 0, -1)
scaled_average = pd.DataFrame(np.reshape(scaled_average, (-1, 3)))
sa_copy = pd.DataFrame(np.reshape(sa_copy, (-1, 3)))
scaled_average[4] = sa_copy.apply(is_pixel_P, axis=1)
return np.reshape(scaled_average.values, (row, col, spec+1))
def create_templates(regions):
templates = {}
scaled_templates = []
for region in regions:
cx = region[0]
cy = region[1]
r = region[2]
tree_portion = get_portion(img.copy(), cx, cy, r)
if r not in templates:
templates[r] = [tree_portion]
else:
templates[r].append(tree_portion)
for template in templates:
curr_template = templates[template]
scaled_average = None
num_of_regions = 0
for region in curr_template:
if scaled_average is None or scaled_average.shape < region.shape:
scaled_average = scale_region(region)
num_of_regions = 1
elif scaled_average.shape == region.shape:
scaled_regions = scale_region(region)
scaled_average = np.add(scaled_average, scaled_regions)
num_of_regions += 1
scaled_average = np.true_divide(scaled_average, num_of_regions)
scaled_average = add_p(template, scaled_average)
scaled_templates.append((template, scaled_average))
return scaled_templates
templates = create_templates(ro_regions)
row, col, spec = train.shape
target = scale_region(train)
target = np.reshape(target, (-1,3)).astype('float64')
target = np.concatenate((target, np.reshape(y_pred_proba, (-1, 1))), axis=1)
refined_pred = np.reshape(refined_class, (-1,3))
target = np.concatenate((target, np.reshape(refined_pred, (-1, 1))), axis=1)
target = np.reshape(target, (row, col, spec+2))
def tm_ccorr_normed(t, s):
s, sx, sy, r = s
s = s[sy-r:sy+1+r, sx-r:sx+1+r]
row,col,spec = t.shape
score = np.array([0, 0, 0, 0])
for i in range(row):
for j in range(col):
if t[i][j][3] == 1:
score = np.add(score, np.multiply(t[i][j][0:4], s[i][j][0:4]))
t_energy = np.array([0, 0, 0, 0])
s_energy = np.array([0, 0, 0, 0])
for i in range(row):
for j in range(col):
if t[i][j][3] == 1:
t_energy = np.add(t_energy, np.power(t[i][j][0:4], 2))
s_energy = np.add(s_energy, np.power(s[i][j][0:4], 2))
energy = np.sqrt(np.multiply(t_energy, s_energy))
i_score = np.true_divide(score, energy)
score = np.average(i_score)
# print("{}, ({},{}), {}, {}".format(r, sx, sy, i_score, score))
return score
def within_constraint(img, constraint):
img, x, y, r = img
img = img[y-r:y+1+r, x-r:x+1+r]
row, col, spec = img.shape
c_mask = np.zeros((row,col))
cv2.circle(c_mask, (r,r), r, 1, -1)
within_area = 0
total_area = 0
for i in range(row):
for j in range(col):
if c_mask[i][j] == 1:
total_area += 1
if img[i][j][4] == 1:
within_area += 1
return within_area / total_area >= constraint
def match_templates(templates, img, thresh):
candidates = []
for template in templates:
r = template[0]
t = template[1]
srow, scol, sspec = img.shape
sx = r
sy = r
candidate_set = []
while True:
score = None
if within_constraint((img,sx,sy,r), 0.75):
score = tm_ccorr_normed(t, (img,sx,sy,r))
if score and score >= thresh:
candidate_set.append((score, sx, sy, r))
if sx == scol-1-r and sy == srow-1-r:
break
if sx == scol-1-r:
sx = r
sy = np.min([sy+(r//2), srow-1-r])
else:
sx = np.min([sx+(r//2), scol-1-r])
candidates.append(candidate_set)
return candidates
def templates_dict_to_list_mapping(templates):
mapping = []
for template in templates:
mapping.append((template[0], template[1]))
return mapping
templates = templates_dict_to_list_mapping(templates)
templates.sort(key=itemgetter(0), reverse=True)
matches = match_templates(templates, target, 0.25)
def distance(a, b):
xa = a[0]
ya = a[1]
xb = b[0]
yb = b[1]
return math.sqrt(((xa-xb)**2) + ((ya-yb)**2))
def remove_overlap(tm, thresh):
tmc = []
for t in tm:
t.sort(key=itemgetter(0), reverse=True)
tmc.extend(t)
final = []
while len(tmc) != 0:
top = tmc.pop(0)
final.append(top)
no_overlap = []
for t in tmc:
rtop = top[3]
rt = t[3]
ctop = (top[1], top[2])
ct = (t[1], t[2])
overlap = (rtop + rt - distance(ctop, ct)) / np.min([rtop, rt])
if overlap <= thresh:
no_overlap.append(t)
tmc = no_overlap
return final
final_matches = remove_overlap(matches, 0.25)
def draw_matches(img, matches, color, outline_width):
for match in matches:
cx = int(match[1])
cy = int(match[2])
r = int(match[3])
cv2.circle(img, (cx,cy), r, color, outline_width)
return img
matched_img = draw_matches(train.copy(), final_matches, (226,230,209), 4)
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(131)
ax.imshow(cv2.cvtColor(matched_img, cv2.COLOR_BGR2RGB))
ax2 = f.add_subplot(132)
ax2.imshow(cv2.cvtColor(annotated_train, cv2.COLOR_BGR2RGB))
ax3 = f.add_subplot(133)
ax3.imshow(cv2.cvtColor(refined, cv2.COLOR_BGR2RGB))
plt.show()
test_lab = cv2.cvtColor(test, cv2.COLOR_BGR2LAB)
test_lab_features = np.reshape(test_lab, (-1, 3))
test_ii_features = illumination_invariant_colorspace_features(test)
test_texture_features = get_texture_features(test_lab[:, :, 0])
test_entropy_features = get_entropy_features(test_lab[:, :, 0])
test_data = np.concatenate((test_lab_features, test_ii_features, test_texture_features, test_entropy_features), axis=1)
test_y_pred = model.predict(test_data)
test_annotated_img = draw_annotations(img.copy(), annotations, (0,0,0), -1)
test_annotated_img = test_annotated_img[:, test_mark:]
test_matrix = pd.DataFrame(np.reshape(test_annotated_img, (-1, 3)))
test_y_ground = test_matrix.apply(is_pixel_tree, axis=1)
print("Accuracy:",metrics.accuracy_score(test_y_ground, test_y_pred))
Accuracy: 0.9372232804232804
test_y_pred_copy = test_y_pred.copy()
test_y_pred_copy[test_y_pred_copy==1] = 255
test_px_level = test.copy()
test_row, test_col, test_spec = test.shape
test_px_level = pd.DataFrame(np.reshape(test_px_level, (-1, 3)))
test_y_pred_copy = pd.DataFrame(test_y_pred_copy)
test_y_pred_copy[1] = test_y_pred_copy[0]
test_y_pred_copy[2] = test_y_pred_copy[1]
test_px_level = test_px_level.where(test_y_pred_copy == 255, 0)
test_px_level = np.reshape(test_px_level.values, (test_row, test_col, test_spec))
test_y_pred_copy = np.reshape(test_y_pred_copy[0].values, (test_row, test_col))
f = plt.figure(figsize=(34,18))
ax = f.add_subplot(131)
ax.set_xlabel("Test")
ax.imshow(cv2.cvtColor(test, cv2.COLOR_BGR2RGB))
ax2 = f.add_subplot(132)
ax2.set_xlabel("Mask")
ax2.imshow(1-test_y_pred_copy, cmap="Greys")
ax3 = f.add_subplot(133)
ax3.set_xlabel("Pixel Level Classification")
ax3.imshow(cv2.cvtColor(test_px_level, cv2.COLOR_BGR2RGB))
plt.show()
cv2.imwrite("../misc/paper/second config/px_mask.png", test_y_pred_copy)
cv2.imwrite("../misc/paper/second config/px_mask_app.png", test_px_level)
True
test_y_pred_proba = model.predict_proba(test_data)[:,1]
test_tree_proba = np.reshape(test_y_pred_proba, (test_row, test_col))
test_tree = np.reshape(test_y_pred, (test_row, test_col))
test_refined_class = swap_minimization(test_tree, test_tree_proba, 10)
test_refined_mask = test_refined_class.copy()
test_refined_mask = np.reshape(test_refined_mask, (-1,1))
test_refined_mask[test_refined_mask==1] = 255
test_refined = test.copy()
test_refined = pd.DataFrame(np.reshape(test_refined, (-1, 3)))
test_refined_mask = pd.DataFrame(test_refined_mask)
test_refined_mask[1] = test_refined_mask[0]
test_refined_mask[2] = test_refined_mask[1]
test_refined = test_refined.where(test_refined_mask == 255, 0)
test_refined = np.reshape(test_refined.values, (test_row, test_col, test_spec))
test_refined_mask = np.reshape(test_refined_mask[0].values, (test_row, test_col))
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(321)
ax.set_xlabel("Test")
ax.imshow(cv2.cvtColor(test, cv2.COLOR_BGR2RGB))
ax2 = f.add_subplot(323)
ax2.set_xlabel("Mask")
ax2.imshow(1-test_y_pred_copy, cmap="Greys")
ax3 = f.add_subplot(324)
ax3.set_xlabel("Pixel Level Classification")
ax3.imshow(cv2.cvtColor(test_px_level, cv2.COLOR_BGR2RGB))
ax4 = f.add_subplot(325)
ax4.set_xlabel("Mask (Refined)")
ax4.imshow(1-test_refined_mask, cmap="Greys")
ax5 = f.add_subplot(326)
ax5.set_xlabel("Refined Classification")
ax5.imshow(cv2.cvtColor(test_refined, cv2.COLOR_BGR2RGB))
plt.show()
cv2.imwrite('../misc/paper/second config/refined_mask.png', test_refined_mask)
cv2.imwrite('../misc/paper/second config/refined_mask_app.png', test_refined)
True
test_row, test_col, test_spec = test.shape
test_target = scale_region(test)
test_target = np.reshape(test_target, (-1,3)).astype('float64')
test_target = np.concatenate((test_target, np.reshape(test_y_pred_proba, (-1, 1))), axis=1)
test_refined_pred = np.reshape(test_refined_class, (-1,3))
test_target = np.concatenate((test_target, np.reshape(test_refined_pred, (-1, 1))), axis=1)
test_target = np.reshape(test_target, (test_row, test_col, test_spec+2))
templates = templates_dict_to_list_mapping(templates)
templates.sort(key=itemgetter(0), reverse=True)
test_matches = match_templates(templates, test_target, 0.25)
test_final_matches = remove_overlap(test_matches, 0.25)
test_matched_img = draw_matches(test.copy(), test_final_matches, (226,230,209), 4)
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(221)
ax.imshow(cv2.cvtColor(test_matched_img, cv2.COLOR_BGR2RGB))
ax2 = f.add_subplot(222)
ax2.imshow(cv2.cvtColor(annotated_test, cv2.COLOR_BGR2RGB))
ax3 = f.add_subplot(224)
ax3.imshow(cv2.cvtColor(test_refined, cv2.COLOR_BGR2RGB))
plt.show()
class Region:
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
self.relations = []
def __repr__(self):
return "{{x: {}; y: {}; r: {};\nrelations:\n{}}}".format(self.x, self.y, self.r, self.relations_stringified())
def add_relation(self, region, size_comparison, fit, reverse_fit):
self.relations.append((region, size_comparison, fit, reverse_fit))
def relations_stringified(self):
stringified = ""
for r in self.relations:
stringified += "[x: {}; y: {}; r: {}; sc: {}; fit: {}; rfit: {}]\n".format(r[0].x, r[0].y, r[0].r, r[1], r[2], r[3])
return stringified
def convert_to_regions(regions):
new_regions = []
for r in regions:
new_regions.append(Region(r[0],r[1],r[2]))
return new_regions
def compare_sizes(rr,gr):
if rr > gr:
return ("l", "s")
elif rr == gr:
return ("e", "e")
else:
return ("s", "l")
def intersection_percent(rcx,rcy,rr,gcx,gcy,gr):
left_bound = min(rcx-rr,gcx-gr)
right_bound = max(rcx+rr,gcx+gr)
up_bound = min(rcy-rr,gcy-gr)
down_bound = max(rcy+rr,gcy+gr)
c1 = np.zeros((down_bound-up_bound+1, right_bound-left_bound+1))
c2 = np.zeros((down_bound-up_bound+1, right_bound-left_bound+1))
cv2.circle(c1, (rcx-left_bound,rcy-up_bound), rr, 1, -1)
cv2.circle(c2, (gcx-left_bound,gcy-up_bound), gr, 1, -1)
c3 = np.add(c1, c2)
intersection = np.count_nonzero(c3 == 2)
a1 = np.count_nonzero(c1 == 1)
a2 = np.count_nonzero(c2 == 1)
return (intersection/a1, intersection/a2)
def compare_data(ground, result):
for r in result:
rcx = r.x
rcy = r.y
rr = r.r
for g in ground:
gcx = g.x
gcy = g.y
gr = g.r
d = distance((rcx,rcy), (gcx,gcy))
if d < rr+gr:
sc, reverse_sc = compare_sizes(rr,gr)
fit, reverse_fit = intersection_percent(rcx,rcy,rr,gcx,gcy,gr)
r.add_relation(g, sc, fit, reverse_fit)
g.add_relation(r, reverse_sc, reverse_fit, fit)
return (ground, result)
test_regions = annotations["austrailian-mango-plantation.jpg2426773"]["regions"]
test_sorted_regions = []
for region in test_regions:
radius = test_regions[region]["shape_attributes"]["r"]
cx = test_regions[region]["shape_attributes"]["cx"] - test_mark
cy = test_regions[region]["shape_attributes"]["cy"]
upperbound = cy - radius
lowerbound = cy + radius
leftbound = cx - radius
rightbound = cx + radius
test_sorted_regions.append((upperbound, lowerbound, leftbound, rightbound, cx, cy, radius))
test_sorted_regions.sort(key=lambda tup: tup[2])
test_regions = [r for r in test_sorted_regions if (r[0]+(r[6]*1) >= 0 and r[1]-(r[6]*1) < img_row and r[2]+(r[6]*0.5) >= 0 and r[3]-(r[6]*1) < img_col-test_mark)]
test_regions = np.array(test_regions)[:, 4:]
test_final_matches.sort(key=lambda tup: tup[2])
test_final_matches = np.array(test_final_matches)[:, 1:].astype('int32')
test_final_matches = convert_to_regions(test_final_matches)
test_regions = convert_to_regions(test_regions)
ground_to_result, result_to_ground = compare_data(test_regions, test_final_matches)
def get_true_positives(rtg):
true_positives = []
for r in rtg:
if len(r.relations) >= 1:
max_rel = r.relations[0]
for i in range(1, len(r.relations)):
rel = r.relations[i]
if rel[3] > max_rel[3]:
max_rel = rel
relations_of_rel = max_rel[0].relations
if len(relations_of_rel) == 1:
true_positives.append((r, max_rel[0]))
elif len(relations_of_rel) > 1:
max_rel_of_rel = relations_of_rel[0]
for i in range(1, len(relations_of_rel)):
rel = relations_of_rel[i]
if rel[2] > max_rel_of_rel[2]:
max_rel_of_rel = rel
if max_rel_of_rel[0] == r:
true_positives.append((r, max_rel[0]))
return true_positives
true_positives = get_true_positives(result_to_ground)
test_copy = test.copy()
for tp in true_positives:
tpr, tpg = tp
cv2.circle(test_copy, (tpr.x,tpr.y), tpr.r, (255,255,255), 2)
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(test_copy, cv2.COLOR_BGR2RGB))
plt.show()
def get_false_positives(rtg):
false_positives = []
for r in rtg:
if len(r.relations) == 0:
false_positives.append(r)
else:
max_rel = r.relations[0]
for i in range(1, len(r.relations)):
rel = r.relations[i]
if rel[3] > max_rel[3]:
max_rel = rel
relations_of_rel = max_rel[0].relations
if len(relations_of_rel) > 1:
max_rel_of_rel = relations_of_rel[0]
for i in range(1, len(relations_of_rel)):
rel = relations_of_rel[i]
if rel[2] > max_rel_of_rel[2]:
max_rel_of_rel = rel
if max_rel_of_rel[0] != r:
false_positives.append(r)
return false_positives
false_positives = get_false_positives(result_to_ground)
test_copy = test.copy()
for fp in false_positives:
cv2.circle(test_copy, (fp.x,fp.y), fp.r, (255,255,255), 2)
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(test_copy, cv2.COLOR_BGR2RGB))
plt.show()
def get_false_negatives(gtr):
false_negatives = []
for g in gtr:
if len(g.relations) == 0:
false_negatives.append(g)
else:
max_rel = g.relations[0]
for i in range(1, len(g.relations)):
rel = g.relations[i]
if rel[2] > max_rel[2]:
max_rel = rel
relations_of_rel = max_rel[0].relations
if len(relations_of_rel) > 1:
max_rel_of_rel = relations_of_rel[0]
for i in range(1, len(relations_of_rel)):
rel = relations_of_rel[i]
if rel[3] > max_rel_of_rel[3]:
max_rel_of_rel = rel
if max_rel_of_rel[0] != g:
false_negatives.append(g)
return false_negatives
false_negatives = get_false_negatives(ground_to_result)
test_copy = test.copy()
for fn in false_negatives:
cv2.circle(test_copy, (fn.x,fn.y), fn.r, (255,255,255), 2)
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(test_copy, cv2.COLOR_BGR2RGB))
plt.show()
test_copy = test.copy()
for tp in true_positives:
tpr, tpg = tp
cv2.circle(test_copy, (tpr.x,tpr.y), tpr.r, (255,255,255), 2)
for fp in false_positives:
cv2.circle(test_copy, (fp.x,fp.y), fp.r, (255,0,0), 2)
for fn in false_negatives:
cv2.circle(test_copy, (fn.x,fn.y), fn.r, (0,0,255), 2)
f = plt.figure(figsize=(15,20))
ax = f.add_subplot(111)
ax.imshow(cv2.cvtColor(test_copy, cv2.COLOR_BGR2RGB))
plt.show()
cv2.imwrite('../misc/paper/second config/results.png', test_copy)
True
TP = len(true_positives)
FP = len(false_positives)
FN = len(false_negatives)
N = TP + FN
A = TP / (TP + FN + FP)
print("""True Positives: {}
False Negatives: {}
False Positives: {}
Total Number of Trees: {}
Accuracy: {}""".format(TP, FN, FP, N, A))
True Positives: 33 False Negatives: 29 False Positives: 3 Total Number of Trees: 62 Accuracy: 0.5076923076923077
dill.dump_session('notebook_session_australian_3.db')
# import dill
# dill.load_session('notebook_session_australian_3.db')
/home/gam/anaconda3/lib/python3.6/site-packages/sklearn/ensemble/weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release. from numpy.core.umath_tests import inner1d